Note. Boxplots display the interquartile range (IQR, center box), and the whiskers extend 1.5*IQR from the lower and upper hinge. The white point indicates the mean and the white center line indicates the median.


Data Preparation

Data Import

# workers
# initial data cleaning was done in SPSS (syntax files are available in "")
dtWorker <- list(
  raw.pre = read_spss("data/S1_Workers/processed/cleaned/MT - Pre-Measure - 06-15-2018.sav"),
  raw.post = read_spss("data/S1_Workers/processed/cleaned/MT - Post-Measure - 06-15-2018.sav"),
  raw.morning = read_spss("data/S1_Workers/processed/cleaned/MT - Morning - 06-15-2018.sav"),
  raw.afternoon = read_spss("data/S1_Workers/processed/cleaned/MT - Afternoon - 06-15-2018.sav")
)

# students
dtStudents <- list(
  raw.pre = read.csv(file = "data/S2_Students/raw/AOTS_Pre.csv", header = T, sep = ","),
  raw.post = read.csv(file = "data/S2_Students/raw/AOTS_Post.csv", header = T, sep = ","),
  raw.daily = read.csv(file = "data/S2_Students/raw/AOTS_Daily.csv", header = T, sep = ",")
)

# young medical professionals
dtMedical <- list(
  raw.pre = c(),
  raw.post = c(),
  raw.daily = c()
)

Data Cleaning & Data Exclusions

#  important names for Morning and Afternoon
  names.m <- c(
    "StartDate",
    "EndDate",
    "Finished",
    "Duration__in_seconds_",
    "RecordedDate",
    "ExternalReference",
    "Meta_Operating_System",
    "Contact_dum",
    "number",
    "time",
    "duration_1",
    "dyad.group",
    "gr_size",
    "gr_type_1",
    "gr_type_2",
    "gr_type_3",
    "gr_type_4",
    "gr_type_5",
    "gr_type_6",
    "gr_type_7",
    "gr_type_8",
    "gr_type_9",
    "gr_type_10",
    "gr_type_11",
    "gr_type_12",
    "gr_type_13",
    "gr_type_14",
    "gr_type_15",
    "gr_type_16",
    "gr_type_17_TEXT",
    "gr_context_1",
    "gr_context_2",
    "gr_context_3",
    "gr_context_4",
    "gr_context_5",
    "gr_context_6",
    "gr_context_7",
    "gr_context_8",
    "gr_context_9",
    "gr_context_10",
    "gr_context_11",
    "gr_context_12",
    "gr_context_13_TEXT",
    "gr_context_14_TEXT",
    "gr_dutchness",
    "dyad_type_1",
    "dyad_type_2",
    "dyad_type_3",
    "dyad_type_4",
    "dyad_type_5",
    "dyad_type_6",
    "dyad_type_7",
    "dyad_type_8",
    "dyad_type_9",
    "dyad_type_10",
    "dyad_type_11",
    "dyad_type_12",
    "dyad_type_13",
    "dyad_type_14",
    "dyad_type_15",
    "dyad_type_16",
    "dyad_type_17_TEXT",
    "Context_1",
    "Context_2",
    "Context_3",
    "Context_4",
    "Context_5",
    "Context_6",
    "Context_7",
    "Context_8",
    "Context_9",
    "Context_10",
    "Context_11",
    "Context_12",
    "Context_13_TEXT",
    "Context_14_TEXT",
    "keyMotive",
    "keymotive_fulfillemt_1",
    "keyMotive_Dutch_1",
    "autonomy_1",
    "competence_1",
    "relatedness_self_1",
    "relatedness_other_1",
    "qualityAccidental_1",
    "qualityVoluntary_1",
    "qualityCooperative_1",
    "qualityDutchy_1",
    "quality_overall_1",
    "quality_meaning_1",
    "quality_star_1",
    "wantInt",
    "desire_type_1",
    "desire_type_2",
    "desire_type_3",
    "desire_type_4",
    "desire_type_5",
    "desire_type_6",
    "desire_type_7",
    "desire_type_8",
    "desire_type_9",
    "desire_type_10",
    "desire_type_11",
    "desire_type_12",
    "desire_type_13",
    "desire_type_14",
    "desire_type_15",
    "desire_type_16",
    "desire_type_17_TEXT",
    "desire_context_1",
    "desire_context_2",
    "desire_context_3",
    "desire_context_4",
    "desire_context_5",
    "desire_context_6",
    "desire_context_7",
    "desire_context_8",
    "desire_context_9",
    "desire_context_10",
    "desire_context_11",
    "desire_context_12",
    "desire_context_13_TEXT",
    "desire_context_14_TEXT",
    "Reason_nodesire",
    "keyMotive_noInt",
    "keyMotive_noInt_fulf_1",
    "autonomy_NoInt_1",
    "competence_NoInt_1",
    "relatedness_1_NoInt_1",
    "thermometerDutch_1",
    "thermometerDutchInt_2",
    "ExWB_1",
    "alertness1",
    "calmness1",
    "valence1",
    "alertness2",
    "calmness2",
    "valence2",
    "inNonDutch",
    "NonDutchNum",
    "NonDutchType_1",
    "NonDutchType_2",
    "NonDutchType_3",
    "NonDutchType_4",
    "NonDutchType_5",
    "NonDutchType_6",
    "NonDutchType_7",
    "NonDutchType_8",
    "NonDutchType_9",
    "NonDutchType_10",
    "NonDutchType_11",
    "NonDutchType_12",
    "NonDutchType_13",
    "NonDutchType_14",
    "NonDutchType_15_TEXT",
    "date",
    "time.0",
    "LocationLatitude",
    "LocationLongitude"
  )
  
  names.a <- c(names.m, "keyInteraction_1", "keyInteractionTime")

# Create reduced data sets for morning and afternoon
  dat.mo <- dtWorker$raw.morning[, names.m]
  dat.mo$daytime <- "morning"
  
  dat.af <- dtWorker$raw.afternoon[, names.a]
  dat.af$daytime <- "afternoon"

# merge morning and afternoon measurements with indicator [+ clean up]
  daily.dat <- rbind.fill(dat.mo, dat.af)
  daily.dat <- daily.dat[daily.dat$ExternalReference != 55951, ]
  dtWorker$daily <- daily.dat
  rm(dat.mo, dat.af, names.m, names.a, daily.dat)


# names for pre-measurement
  names.pre <- c(
    "Finished",
    "age",
    "Gender",
    "Living",
    "roommate_1",
    "roommate_2",
    "roommate_3",
    "nationality",
    "SecondNationality",
    "timeNL_1",
    "Reason_2",
    "Reason_5",
    "Reason_7",
    "Reason_8_TEXT",
    "DutchLang",
    "occupation_1",
    "occupation_2",
    "occupation_3",
    "occupation_4",
    "occupation_7",
    "CurrentEducation_1",
    "education_level",
    "EduLang_2",
    "RUG_faculty",
    "Study.0",
    "association",
    "DutchMeetNum",
    "DutchFriends_1",
    "assimilation",
    "separation",
    "integration",
    "marginalization",
    "VIA_heritage",
    "VIA_Dutch",
    "SSAS_surrounding",
    "SSAS_privat",
    "SSAS_public",
    "autonomy",
    "relatedness",
    "competence",
    "anxiety",
    "swl",
    "alertness",
    "calmness",
    "valence",
    "date",
    "time",
    "City",
    "ZIP",
    "id"
  )
  
# reduced data set for pre measurement
  dat.pre.red <- dtWorker$raw.pre[, names.pre]

# merge with daily data [+ clean up]
  df.pre <- merge(
    x = dtWorker$daily,
    y = dat.pre.red,
    by.x = "ExternalReference",
    by.y = "id",
    all = T
  )
  rm(names.pre)

# adjust duplicate names to fit to indicate daily or pre measurement  
  names(df.pre) <- gsub("[[:punct:]]x", ".daily", names(df.pre))
  names(df.pre) <- gsub("[[:punct:]]y", ".pre", names(df.pre))

# names for post measurement  
  names.post <- c(
    "ExternalReference",
    "assimilation",
    "separation",
    "integration",
    "marginalization",
    "VIA_heritage",
    "VIA_Dutch",
    "anxiety",
    "swl",
    "rosenberg",
    "social_support",
    "stress",
    "discrimination",
    "discrimination_month",
    "NLE_1month",
    "NLE_6month",
    "NLE_12month"
  )

# reduced data set for post-measurement  
  dat.post.red <- dtWorker$raw.post[, names.post]

# merge post measurement with pre- and daily data  
  df <- merge(
    x = df.pre,
    y = dat.post.red,
    by.x = "ExternalReference",
    by.y = "ExternalReference",
    all = T
  )

# adjust duplicate names to indicate pre or post  
  names(df) <- gsub("[[:punct:]]x", ".pre", names(df))
  names(df) <- gsub("[[:punct:]]y", ".post", names(df))
  
# add to list
  dtWorker$combined <- df
  
# create data frame with cleaned data
  df <- dtWorker$combined %>%
    filter(Finished.pre == 1,
           Finished.daily == 1,
           !is.na(ExternalReference))

# add running number as measurement ID within participants   
  df$measureID = rowidv(df, cols = c("ExternalReference"))
  
  df <- df %>%
    mutate(
      PID = as.numeric(factor(ExternalReference)), # participant ID
      TID = measureID-1, # time ID with t0 = 0 for meaningfull intercept interpretations
      date = substr(StartDate,1,10), # awkward way of extracting date (best converted to )
      time = substr(StartDate,12,19), # awkward way of extracting time
      daynum = as.numeric(factor(date)), # all days as numeric for ordering
      daycor = ifelse(daytime=="morning" & period_to_seconds(hms(time))<period_to_seconds(hms("12:00:00")) | daytime=="afternoon" & period_to_seconds(hms(time))<period_to_seconds(hms("19:00:00")),daynum-1,daynum), # correctly identify which date the questionnaire is about
      daycor.lead = sprintf("%02d", daycor),
      daytime.lt = ifelse(daytime=="morning","a","b"), # morning / afternoon to a / b 
      day_time = paste(daycor.lead, daytime.lt, sep="_"), # combine day id with morning / afternoon
      session = as.numeric(factor(day_time)), # day and time identifier as numeric id
      SubTime = chron::times(time.0),
      time.daily = as.character(time.daily),
      PPDate = as.Date(df$date.daily),
      number = replace_na(number, 0),
      NonDutchNum = replace_na(NonDutchNum, 0)
    )
  
  dtWorker$clean <- df
  
# clean up
  rm(df.pre, names.post, dat.post.red, dat.pre.red, df)

# Export reduced Data
  #write.csv(dtWorker$clean, "data/processed/MT_clean-merged_07-05-2018.csv", row.names = F)
  #save(dtWorker$clean, file = "data/processed/MT_clean-merged_07-05-2018.RData")
# our own test IDs
ownIDs <- c(
  "beautifulLionfishXXXR5rcgVBzGu8hPvOqrK8UBJBw4owvi9nfRFSFu3lMzYhE",
  "niceDogoXXXmB8JI5SFu78SF3DVof84mGUPPNUr14p2HYFTtp31a6D1OwAzM6F-K",
  "amusedQuailXXXmhuc_fpTp8vPkMwDH1BzjaH1d1kHSO1bsPEfsnaEYk4WeVBfPi",
  "juwGAbtXX0_1kmZtSVqKh3PGaHOICqUyU4iBkrT3nDsI_uifuD1gzKcZerxaM5FL"
)

# Prepare dfs for Cleaning
df.pre <- dtStudents$raw.pre %>%
  mutate_all(na_if,"") %>%
  mutate_all(na_if,"NA") %>%
  filter(!is.na(ended)) %>% # remove all who did not finish
  filter(!e_mail %in% .$e_mail[duplicated(.$e_mail)]) %>% # remove all who did the pre questionnaire multiple times (b/c inconsistent ratings scales)
  filter(!session %in% ownIDs) %>% # remove our own test 
  mutate(session = as.character(session)) # turn factor into character strings (probably just precaution)

df.post <- dtStudents$raw.post %>%
  mutate_all(na_if,"") %>%
  mutate_all(na_if,"NA") %>%
  filter(!is.na(session)) %>% # remove own test runs
  filter(!session %in% ownIDs) %>% # remove our own test 
  filter(session %in% df.pre$session) %>% # remove anyone who wasn't in the pre
  filter(!is.na(ended)) %>% # remove all who never finished
  filter(!session %in% .$session[duplicated(.$session)]) %>% # remove all duplicate sessions
  mutate(session = as.character(session)) # turn factor into character strings (probably just precaution)

df.daily <- dtStudents$raw.daily %>%
  mutate_all(na_if,"") %>%
  mutate_all(na_if,"NA") %>%
  filter(!session %in% ownIDs) %>% # remove our own test 
  filter(session %in% df.pre$session) %>% # remove anyone who wasn't in the pre
  filter(!is.na(ended)) %>% # remove all who never finished
  mutate(session = as.character(session)) # turn factor into character strings (probably just precaution)

# merge daily with pre
dfPreDaily = merge(
  x = df.daily,
  y = df.pre,
  by = "session",
  suffixes = c(".daily", ".pre"),
  all = F
)
  
# merge daily with post 
dfCombined = merge(
  x = dfPreDaily,
  y = df.post,
  by = "session",
  suffixes = c(".pre", ".post"),
  all = F
)

# add to list
dtStudents$clean <- dfCombined 

# clean up workspace
rm(df.pre, df.daily, df.post, dfPreDaily, dfCombined, ownIDs)
#TBD

Calculate needed transformations

df <- dtWorker$clean

# Time and Date Variables
  # remove seconds from afternoon time
  df$SubTime[df$daytime == "afternoon"] = paste0(substring(as.character(df$time.0[df$daytime == "afternoon"]),4,8),":00") 
  df$time.daily[df$daytime == "afternoon" & !is.na(df$time.daily!="<NA>")] = paste0(substring(as.character(df$time.daily[df$daytime == "afternoon" & !is.na(df$time.daily!="<NA>")]),4,8),":00")
  
  # Correct morning / afternoon date where survey was collected the day after to indicate the correct date that was targeted
  df$PPDate[df$SubTime < "11:50:00" & df$daytime == "morning"] = df$PPDate[df$SubTime < "11:50:00" & df$daytime == "morning"]-1
  df$PPDate[df$SubTime < "18:50:00" & df$daytime == "afternoon"] = df$PPDate[df$SubTime < "18:50:00" & df$daytime == "afternoon"]-1
  
# Mood scales
  df$calmness.daily = rowSums(df[, c("calmness1", "calmness2")], na.rm = T)
  df$alertness.daily = rowSums(df[, c("alertness1", "alertness2")], na.rm =T)
  df$valence.daily = rowSums(df[, c("valence1", "valence2")], na.rm = T)
  
# Need scales
  df$keyMotiveFulfilled = rowSums(df[,c("keymotive_fulfillemt_1","keyMotive_noInt_fulf_1")], na.rm=T)
  df$autonomy.daily.all = rowSums(df[,c("autonomy_1","autonomy_NoInt_1")], na.rm=T)
  df$competence.daily.all = rowSums(df[,c("competence_1","competence_NoInt_1")], na.rm=T)
  #cor(df$relatedness_other_1, df$relatedness_self_1,use="complete.obs")
  df$relatedness.daily.all = rowMeans(df[,c("relatedness_other_1","relatedness_self_1","relatedness_1_NoInt_1")], na.rm=T)
  df$relatedness.daily.int = rowMeans(df[,c("relatedness_other_1","relatedness_self_1")], na.rm=T)

# summarize by participant (check that everything is within pp might not be the case for )
  between <- df %>%
    group_by(ExternalReference) %>%
    mutate(
      CtContactNL = sum(Contact_dum),
      CtContactNonNl = sum(inNonDutch),
      CtContactNLAll = sum(number),
      CtContactNonNlAll = sum(NonDutchNum),
      AvKeyNeed = mean(keyMotiveFulfilled, na.rm=T),
      AvKeyNeedInt = mean(keymotive_fulfillemt_1, na.rm=T),
      AvKeyNeedNoInt = mean(keyMotive_noInt_fulf_1, na.rm=T),
      AvAutonomy = mean(autonomy.daily.all, na.rm=T),
      AvCompetence = mean(competence.daily.all, na.rm=T),
      AvRelatedness = mean(relatedness.daily.all, na.rm=T),
      AvThermo = mean(thermometerDutch_1, na.rm=T),
      AvWB = mean(ExWB_1, na.rm=T)) %>%
    ungroup() %>%
    mutate(
      CtContactNL_c = scale(CtContactNL, scale = FALSE),
      AvKeyNeedInt_c = scale(AvKeyNeedInt, scale = FALSE),
      AvKeyNeed_c = scale(AvKeyNeed, scale = FALSE),
      CtContactNL_z = scale(CtContactNL, scale = TRUE),
      AvKeyNeedInt_z = scale(AvKeyNeedInt, scale = TRUE),
      AvKeyNeed_z = scale(AvKeyNeed, scale = TRUE)
    ) 
  
  warning("some variable transformations (esp. _c and _z) might be across all participants (i.e., not within PP).")
  
  dtWorker$full <- between
  
  rm(df, between)
  
  #save(df.btw, file = "data/processed/df.btw.RData")  
  #write_sav(df.btw, "data/processed/MT_clean-merged_pre-post.sav")
  
# export data to Mplus
  # df.mplus = remove_all_labels(select(df, PID, session, thermometerDutch_1, inNonDutch, Contact_dum, keyMotiveFulfilled, autonomy.daily.all, competence.daily.all, relatedness.daily.all))
  # names(df.mplus)= c("PID", "session", "att", "intin", "intout", "keymot", "aut", "comp", "rel")
  # mplus = df.mplus[order(df.mplus$PID, df.mplus$session),]
  # mplus.intcont = mplus[mplus$intout==1,]
  # prepareMplusData(mplus.intcont, "data/processed/dynamic-subset-intonly.dat")
df <- dtStudents$clean

# Add ID variables
  df$PID = as.numeric(factor(df$session)) # participant ID

# order time
  df$TID <- factor(df$date_period, levels = unique(dtStudents$raw.daily$date_period))
  df$TIDnum <- as.numeric(df$TID) #get numeric TID
  
  # check whether time ordering worked
  df <- df %>%
    arrange(PID, TID) #%>%
    #View()
  
# Interaction as Factor
  df$interaction.f <- factor(df$Interaction, levels = c("no interaction", "Dutch", "Non-Dutch"))
  df$intNL <- ifelse(df$Interaction=="Dutch",1,0)
  df$intNonNL<- ifelse(df$Interaction=="Non-Dutch",1,0)
   
# ------------------------------------------------------------------------------------------------------------- 
#                                       Combine Variables
# -------------------------------------------------------------------------------------------------------------
# Relatedness
  pairs.panels.new(df[c("RelatednessSelf","RelatednessOther")], 
                              labels = c("I shared information about myself.", "X shared information about themselves."))

  df$RelatednessInteraction <- rowMeans(df[c("RelatednessSelf","RelatednessOther")], na.rm=T)
  df$RelatednessInteraction[df$RelatednessInteraction == "NaN"] <- NA
# Relatedness Overall (JANNIS NOT SURE THESE ARE CORRECT, CHANGE ROWS?; J: Changed "NaN" in df$RelatednessInteraction to NA() should work now)
  df$Relatedness <- rowMeans(df[,c("RelatednessInteraction", "RelatednessNoInteraction")], na.rm=T)
# Pro-Sociality
  df$ProSo <- rowMeans(df[,c("ProSo1", "ProSo2", "ProSo3", "ProSo4")], na.rm=T)
# Anti-Sociality
  df$AntiSo <- rowMeans(df[,c("AntiSo1", "AntiSo2", "AntiSo3", "AntiSo4")], na.rm=T)


# ------------------------------------------------------------------------------------------------------------- 
#                                 Add Variables related to interaction partner
# -------------------------------------------------------------------------------------------------------------
# create function for later lapply
createIntPartDf <- function(inp) {
  
  # prepare the dataframe so that we can forloop over it later
    tmp <- data.frame(CC = as.character(inp$CC),
                    NewCC = as.character(inp$NewCC),
                    NewName = as.character(inp$NewName),
                    NewCloseness = inp$NewCloseness,
                    NewGender = inp$NewGender,
                    NewEthnicity = as.character(inp$NewEthnicity),
                    NewRelationship = as.character(inp$NewRelationship))
    
    tmp$CC2 <- recode(tmp$CC, 'SOMEONE ELSE' = "NA")
    tmp$CC2 <- ifelse(tmp$CC == 1 | tmp$CC == "SOMEONE ELSE", as.character(tmp$NewName), as.character(tmp$CC2)) 
    # maybe add [[:space:]]\b to remove space before word boundary or ^[[:space:]] to remove space in the beginning of a string
    tmp$CC2 <- gsub("^[[:space:]]", "", tmp$CC2)
    tmp$NewName <- gsub("^[[:space:]]", "", tmp$NewName)
    
  # open the variables that will be filled up in the foor-loop
    tmp$closeness <- rep(NA, nrow(tmp))
    tmp$gender <- rep(NA, nrow(tmp))
    tmp$ethnicity <- rep(NA, nrow(tmp))
    tmp$relationship <- rep(NA, nrow(tmp))
  
    # Run the for-loop. It finds the variables related to the name of the interaction partner. If there is a repeating interaction
    # partner (i.e. CC2) it takes the value (i.e. NewCloseness) from the first interaction (i.e. NewName)
      for (i in 1:nrow(tmp)) {
         if (is.na(tmp$CC2[i])) {
           next
         } else {
           tmp$closeness[i] <- na.omit(tmp$NewCloseness[as.character(tmp$CC2[i]) == as.character(tmp$NewName)])[1] #find closeness where CC2 matches NewName (na.omit + [1] to get the number)
           tmp$gender[i] <- na.omit(tmp$NewGender[as.character(tmp$CC2[i]) == as.character(tmp$NewName)])[1] #(na.omit + [1] to get the number and not the rest of the na.omit list)
           tmp$ethnicity[i] <- na.omit(as.character(tmp$NewEthnicity[as.character(tmp$CC2[i]) == as.character(tmp$NewName)]))[1] #PROBLEM IS THAT THERE ARE TOO MANY NA's: Difficult to deal with
           tmp$relationship[i] <- na.omit(as.character(tmp$NewRelationship[as.character(tmp$CC2[i]) == as.character(tmp$NewName)]))[1]
         }
      }
      
    out <- tmp
    out
}

# split df per participants and run function
  PP <- split(df, df$PID)
  PP <- lapply(PP, createIntPartDf); rm(createIntPartDf)
  
# add variables back to df
  remergePP <- do.call(rbind.data.frame, PP)
    colnames(remergePP) <- paste(colnames(remergePP), "_Calc", sep = "")
df <- cbind(df, remergePP);rm(remergePP)

# ------------------------------------------------------------------------------------------------------------- 
#                                 Center Relevant Variables
# -------------------------------------------------------------------------------------------------------------

df <- df %>%
  group_by(PID) %>% 
  mutate(
    KeyNeedFullfillment.cm = mean(KeyNeedFullfillment, na.rm = TRUE), # cluster mean (mean of PP)
    KeyNeedFullfillment.cwc = KeyNeedFullfillment - KeyNeedFullfillment.cm, # cluster mean centered (within PP centered)
    closeness.cm = mean(closeness_Calc, na.rm = TRUE),
    closeness.cwc = closeness_Calc - closeness.cm
    ) %>%  
  ungroup()

# store
  dtStudents$full = df
# TBD

Worker Sample

Data Description

Still in ‘scr/workerDescriptive.R’. Needs to be merged with this document.

Contact Hypothesis

Interaction Frequency and Outgroup Attitudes

\[\begin{equation} \tag{1} r_{ContactFrequency, OutgroupAttitudes} \neq 0 \end{equation}\]

# prepare data
workerContactFreq <- dtWorker$full %>% 
  group_by(ExternalReference) %>%
  summarise(
    n = n(),
    SumContactNL = sum(Contact_dum),
    PercContactNL = SumContactNL/n*100,
    SumContactNLAll = sum(number),
    AvAttitude = mean(thermometerDutch_1, na.rm=T)
  ) %>%
  mutate(
    WinSumContactNL = DescTools::Winsorize(SumContactNL),
    WinSumContactNLAll = DescTools::Winsorize(SumContactNLAll)
  )

# correlation panel 
pairs.panels.new(workerContactFreq %>% select(SumContactNL, SumContactNLAll, AvAttitude), 
                 labels = c("Sum:\nNumer of beeps with Outgroup Contact (NL)", 
                            "Sum:\nNumber of Outgroup Contacts (NL)", 
                            "Mean:\nOutgroup Attitudes (NL)"))

# correlation panel with interaction sums winsorized
pairs.panels.new(workerContactFreq %>% select(WinSumContactNL, WinSumContactNLAll, AvAttitude), 
                 labels = c("Sum:\nNumer of beeps with Outgroup Contact (NL)\n[Winsorized]", 
                            "Sum:\nNumber of Outgroup Contacts (NL)\n[Winsorized]", 
                            "Mean:\nOutgroup Attitudes (NL)"))

TL;DR: Neither the number of interactions nor the number of measurement beeps with an interaction are significantly related with the average outgroup attitudes. This is to say that within our data participants with more outgroup interactions did not have significantly more positive outgroup attitudes. This might be due to the aggregation within the participants or the small sample size of between participant data. Nonetheless, the aggregate data does not support the notion that simply having more interactions with an outgroup results in more positive outgroup attitudes.

Outgroup Attitudes by Interaction Type

\[\begin{equation} \tag{2} \mu_{i,0utgroupInteraction} > \mu_{i,IngroupInteraction} \\ Attitude = OutgroupInteraction \times NonOutgroupInteraction \end{equation}\]

Regression with interaction term

Across participants: impact of different interaction types [probably meaningless because ignores response sets between participants]

workerContactType <- dtWorker$full %>% 
  group_by(
    Contact_dum,
    inNonDutch
    ) %>%
  summarise(
    n = n(),
    AttitudeM = mean(thermometerDutch_1, na.rm=T),
    AttitudeSD = sd(thermometerDutch_1, na.rm=T),
    AttitudeSE = AttitudeSD/sqrt(n),
    AttitudeLwr = AttitudeM - 1.96*AttitudeSE,
    AttitudeUpr = AttitudeM + 1.96*AttitudeSE
  ) %>%
  ungroup %>%
  mutate(
    InteractionType = paste(
      ifelse(Contact_dum == 1, "Out", "NoOut"),
      ifelse(inNonDutch == 1, "In", "NoIn"),
      sep = ", "
    )
  )

ggplot(workerContactType, aes(y = AttitudeM, x = as_factor(Contact_dum), fill = as_factor(inNonDutch))) +
  geom_bar(stat = "identity", 
                   color = "black", 
                   position=position_dodge()) +
  geom_errorbar(aes(ymin=AttitudeM, ymax=AttitudeUpr), width=.2,
                 position=position_dodge(.9)) +
  labs(
    fill = "Non-Outgroup Interaction",
    x = "Outgroup Interaction",
    y = "Outgroup Attitude",
    title = "Outgroup Attitudes by Interaction Type [95% CI]"
  ) +
  scale_fill_grey(
    start = 0.2,
    end = 0.8
    ) +
  scale_y_continuous(
    limits = c(0, 100),
    breaks = c(0, 15, 30, 40, 50, 60, 70, 85, 100),
    labels = c("Very cold or unfavorable feelings 0°", 
               "Quite cold and unfavorable feelings 15°", 
               "Fairly cold and unfavorable feelings 30°", 
               "A bit cold and unfavorable feelings 40°", 
               "No feeling at all 50°", 
               "A bit warm and favorable feelings 60°", 
               "Fairly warm and favorable feelings 70° ", 
               "Quite warm and favorable feelings 85° ", 
               "Very warm and favorable feelings 100° ")
  ) +
  theme_Publication()

# prepare dataframe
workerInteractionType <- dtWorker$full %>%
  mutate(
    OutgroupInteraction = as_factor(Contact_dum),
    NonOutgroupInteraction = as_factor(inNonDutch)
  )

# regression
lmWorkerInteraction <- lm(thermometerDutch_1 ~ OutgroupInteraction * NonOutgroupInteraction, data = workerInteractionType)
#summary(lmWorkerInteraction)
# apa_table(
#   apa_print(lmWorkerInteraction)$table, 
#   caption = "Regression across all measurement points [ignoring nested data]."
# ) 

summ(
  lmWorkerInteraction,
  confint = TRUE, 
  digits = 3,
  center = TRUE
     )
Observations 1225
Dependent variable thermometerDutch_1
Type OLS linear regression
F(3,1221) 4.867
0.012
Adj. R² 0.009
Est. 2.5% 97.5% t val. p
(Intercept) 69.507 67.899 71.116 84.796 0.000
OutgroupInteraction 3.620 0.378 6.862 2.191 0.029
NonOutgroupInteraction -0.513 -2.593 1.566 -0.484 0.628
OutgroupInteraction:NonOutgroupInteraction -0.098 -4.021 3.826 -0.049 0.961
Standard errors: OLS; Continuous predictors are mean-centered.

TL;DR: While controlling for interactions with non-Dutch people, outgroup attitudes were significantly higher when participants had an interaction with a Dutch person. The effect is relatively small (3.62 points on a 0–100 scale). More importantly, however, this analysis lumps all ESM beeps from every participants together and ignores that the data is nested within participants.

ML Regression with interaction term

Unconditional model

make a model with only the data but a random intercept (unconditional model)

\[\begin{equation} \tag{3} Attitude_{ti} = \gamma_{00} + u_{0i} + e_{ti} \\ \textrm{with}\ u_{0i} \sim \mathcal{N}(0,\tau_{00}^2)\ \textrm{and}\ e_{ti} \sim \mathcal{N}(0,\sigma^2) \end{equation}\]

df <- dtWorker$full
# Create and save Model
  Null.ML.r = lme4::lmer(thermometerDutch_1~1 + (1|PID),data=df) #use optim if it does not converge
  Null.ML = lme(thermometerDutch_1~1,random=~1|PID,data=df, control=list(opt="nlmimb")) #use optim if it does not converge

# Get summary with p-values (Satterthwaite's method)
  #summary(Null.ML.r) #or with the lme function
  summ(Null.ML.r, digits = 3, center = TRUE)
Observations 1225
Dependent variable thermometerDutch_1
Type Mixed effects linear regression
AIC 8805.880
BIC 8821.213
Pseudo-R² (fixed effects) 0.000
Pseudo-R² (total) 0.698
Fixed Effects
Est. S.E. t val. d.f. p
(Intercept) 71.338 2.695 26.466 22.053 0.000
p values calculated using Satterthwaite d.f. ; Continuous predictors are mean-centered.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 12.797
Residual 8.425
Grouping Variables
Group # groups ICC
PID 23 0.698
# generate 95% parametric bootstrap CIs (and save them as a csv-file):
  #write.csv(confint(lmer(thermometerDutch_1~1 + (1|PID),data=df),
  #                  method="boot",nsim=1000, 
  #                  parallel = "multicore", ncpus = 4, seed = 42),
  #          "output/tables/ML-Null-CI.csv")

# Save variances
  v.null <- VarCorr(Null.ML) # save variances
# The estimate of (between-group or Intercept variance, tau_{00}^2):
  tau <- as.numeric(v.null[1])
# and the estimate of (within-group or residual variancel, sigma^2) is:
  sigma <- as.numeric(v.null[2])
# The ICC estimate (between/between+within) is:
  ICC <- (as.numeric(v.null[1])/(as.numeric(v.null[1])+as.numeric(v.null[2])))
  ICC.Perc <- ((as.numeric(v.null[1])/(as.numeric(v.null[1])+as.numeric(v.null[2]))))*100

ICC \(\tau_{00}^2 / (\tau_{00}^2 + \sigma^2)\): The ratio of the between-cluster variance to the total variance is called the Intraclass Correlation. It tells you the proportion of the total variance in Y that is accounted for by the clustering. (In this case the clustering means clustering observations per participant)

compare the random intercept model to a model without a random intercept (i.e., without levels at all)

# Create and save Model
  Null.gls <-  gls(thermometerDutch_1~1,data=df, control=list(opt="nlmimb"))

# calculate Deviances manually:
  Deviance.gls <- logLik(Null.gls)*-2
  Deviance.null <- logLik(Null.ML)*-2

# Compare the two null models:
  anova(Null.gls, Null.ML) %>%
    as.data.frame %>%
    select(-call) %>%
    kbl(., 
        #label = "",
        caption = "Worker: Model Comparison",
        format = "html", 
        linesep = "",
        booktabs = T,
        align = rep('c', ncol(.)),
        digits = 3)  %>%
  kable_styling(position = "left")
(#tab:Null.Model.GLS)Worker: Model Comparison
Model df AIC BIC logLik Test L.Ratio p-value
Null.gls 1 2 10133 10144 -5065 NA NA
Null.ML 2 3 8806 8821 -4400 1 vs 2 1329 0

We find that an estimated 69.76% of the variation in Feeling Thermometer scores is explained by between participant differences (clustering by PID). This is to say that 69.76% of the variance in any individual report of Attitudes towards the Dutch can be explained by the properties of the individual who provided the rating. And we find that including ‘participant’ as a predictor adds significantly to the model.

Add level one predictors

\[\begin{equation} \tag{4} Attitude_{ti} = \gamma_{00} + \gamma_{10}OutgroupInteraction_{ti} + \gamma_{10}NonOutgroupInteraction_{ti} + u_{0i} + e_{ti} \\ \textrm{with}\ u_{0i} \sim \mathcal{N}(0,\tau_{00}^2)\ \textrm{and}\ e_{ti} \sim \mathcal{N}(0,\sigma^2) \end{equation}\]

# Create and save Model
  model.int = lme(thermometerDutch_1 ~ OutgroupInteraction + NonOutgroupInteraction, random=~1|PID,data=workerInteractionType)

# Get summary with p-values (Satterthwaite's method)
  summ(lmer(thermometerDutch_1 ~ OutgroupInteraction + NonOutgroupInteraction + (1|PID),data=workerInteractionType), 
       confint = TRUE, digits = 3, center = TRUE)
Observations 1225
Dependent variable thermometerDutch_1
Type Mixed effects linear regression
AIC 8788.218
BIC 8813.771
Pseudo-R² (fixed effects) 0.006
Pseudo-R² (total) 0.703
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 70.343 65.002 75.683 25.814 22.897 0.000
OutgroupInteraction 2.477 1.364 3.589 4.365 1204.135 0.000
NonOutgroupInteraction 0.427 -0.683 1.538 0.754 1204.911 0.451
p values calculated using Satterthwaite d.f. ; Continuous predictors are mean-centered.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 12.811
Residual 8.362
Grouping Variables
Group # groups ICC
PID 23 0.701
# Generate 95% parametric bootstrap CIs (and save them as a csv-file):
  #write.csv(confint(lmer(thermometerDutch_1 ~ Contact_dum + inNonDutch + (1|PID),data=df), 
  #                  method="boot",nsim=1000, parallel = "multicore", 
  #                  ncpus = 4, seed = 42),
  #          "output/tables/ML-Inter-CI.csv")

# Compare new model to previous step
  anova(Null.ML, model.int) %>%
    as.data.frame %>%
    select(-call) %>%
    kbl(., 
        #label = "",
        caption = "Worker: Model Comparison",
        format = "html", 
        linesep = "",
        booktabs = T,
        align = rep('c', ncol(.)),
        digits = 3)  %>%
  kable_styling(position = "left")
Table 1: Worker: Model Comparison
Model df AIC BIC logLik Test L.Ratio p-value
Null.ML 1 3 8806 8821 -4400 NA NA
model.int 2 5 8788 8814 -4389 1 vs 2 21.66 0
# Save variances
  v.int <- lme4::VarCorr(model.int) 

# The estimate of between-group (or Intercept variance) explained: 
  # Variance Explained = 1 – (Var with Predictor/Var without Predictor)
    v.int.btw <- 1 - (as.numeric(v.int[1])/as.numeric(v.null[1]))
    v.int.btw.perc <-  (1 - (as.numeric(v.int[1])/as.numeric(v.null[1])))*100
  # and the estimate of within-group (or residual variancel) explained is:
    v.int.within <- 1 - (as.numeric(v.int[2])/as.numeric(v.null[2]))
    v.int.within.perc <- (1 - (as.numeric(v.int[2])/as.numeric(v.null[2])))*100

tl;dr: Interaction with Dutch is great predictor, interactions with non-Dutch is not.

\[\begin{equation} \tag{5} Attitude_{ti} = \gamma_{00} + \gamma_{10}OutgroupInteraction_{ti} \times \gamma_{10}NonOutgroupInteraction_{ti} + u_{0i} + e_{ti} \\ \textrm{with}\ u_{0i} \sim \mathcal{N}(0,\tau_{00}^2)\ \textrm{and}\ e_{ti} \sim \mathcal{N}(0,\sigma^2) \end{equation}\]

# Create and save Model
  model.intX = lme(thermometerDutch_1 ~ OutgroupInteraction * NonOutgroupInteraction, random=~1|PID,data=workerInteractionType)

# Get summary with p-values (Satterthwaite's method)
  summ(lmer(thermometerDutch_1 ~ OutgroupInteraction * NonOutgroupInteraction + (1|PID),data=workerInteractionType), 
       confint = TRUE, digits = 3, center = TRUE)
Observations 1225
Dependent variable thermometerDutch_1
Type Mixed effects linear regression
AIC 8788.066
BIC 8818.731
Pseudo-R² (fixed effects) 0.006
Pseudo-R² (total) 0.703
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 70.297 64.942 75.652 25.728 23.140 0.000
OutgroupInteraction 2.651 0.798 4.505 2.803 1200.438 0.005
NonOutgroupInteraction 0.506 -0.789 1.801 0.765 1203.570 0.444
OutgroupInteraction:NonOutgroupInteraction -0.263 -2.496 1.970 -0.231 1200.071 0.817
p values calculated using Satterthwaite d.f. ; Continuous predictors are mean-centered.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 12.812
Residual 8.365
Grouping Variables
Group # groups ICC
PID 23 0.701
# Generate 95% parametric bootstrap CIs (and save them as a csv-file):
  #write.csv(confint(lmer(thermometerDutch_1 ~ Contact_dum + inNonDutch + (1|PID),data=df), 
  #                  method="boot",nsim=1000, parallel = "multicore", 
  #                  ncpus = 4, seed = 42),
  #          "output/tables/ML-Inter-CI.csv")

# Compare new model to previous step
  anova(model.int, model.intX) %>%
    as.data.frame %>%
    select(-call) %>%
    kbl(., 
        #label = "",
        caption = "Worker: Model Comparison",
        format = "html", 
        linesep = "",
        booktabs = T,
        align = rep('c', ncol(.)),
        digits = 3)  %>%
  kable_styling(position = "left")
Table 2: Worker: Model Comparison
Model df AIC BIC logLik Test L.Ratio p-value
model.int 1 5 8788 8814 -4389 NA NA
model.intX 2 6 8788 8819 -4388 1 vs 2 2.151 0.142
# Save variances
  v.intX <- lme4::VarCorr(model.intX) 

# The estimate of between-group (or Intercept variance) explained: 
  # Variance Explained = 1 – (Var with Predictor/Var without Predictor)
    v.intX.btw <- 1 - (as.numeric(v.intX[1])/as.numeric(v.int[1]))
    v.intX.btw.perc <-  (1 - (as.numeric(v.intX[1])/as.numeric(v.int[1])))*100
  # and the estimate of within-group (or residual variancel) explained is:
    v.intX.within <- 1 - (as.numeric(v.intX[2])/as.numeric(v.int[2]))
    v.intX.within.perc <- (1 - (as.numeric(v.intX[2])/as.numeric(v.int[2])))*100

TL;DR: Interaction term does not add anything at all.

Check for Random Slopes

\[\begin{equation} \tag{6} Attitude_{ti} = \gamma_{00} + \gamma_{10}OutgroupInteraction_{ti} + \gamma_{10}NonOutgroupInteraction_{ti} + u_{0i} + u_{1i} + e_{ti} \\ \textrm{with}\ \begin{bmatrix} u_{0i}\\ u_{1i}\end{bmatrix} \sim \mathcal{N}(0,\Omega_u): \Omega_u=\begin{bmatrix} \tau_{0}^2 & \\ \tau_{01} & \tau_{1}^2 \end{bmatrix} \textrm{and}\ e_{ti} \sim \mathcal{N}(0,\sigma^2) \end{equation}\]

# Create and save Model (optimizer needed to reach convergence)
  model.ran = lme(thermometerDutch_1~
                         OutgroupInteraction + NonOutgroupInteraction, 
                         random=~1+OutgroupInteraction+NonOutgroupInteraction|PID,
                       control=lmeControl(opt='optim'), data=workerInteractionType)

# Get summary with p-values (Satterthwaite's method) [+ save model for plotting]
  summ(model.ran0 <- lmer(thermometerDutch_1 ~
                           OutgroupInteraction + NonOutgroupInteraction + 
                           (1+OutgroupInteraction+NonOutgroupInteraction|PID), 
                         data=workerInteractionType), 
       confint = TRUE, digits = 3, center = TRUE)
Observations 1225
Dependent variable thermometerDutch_1
Type Mixed effects linear regression
AIC 8793.695
BIC 8844.802
Pseudo-R² (fixed effects) 0.006
Pseudo-R² (total) 0.710
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 70.371 65.001 75.741 25.682 21.938 0.000
OutgroupInteraction 2.572 1.079 4.065 3.376 19.291 0.003
NonOutgroupInteraction 0.434 -0.693 1.562 0.755 205.804 0.451
p values calculated using Satterthwaite d.f. ; Continuous predictors are mean-centered.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 12.883
PID OutgroupInteraction 2.248
PID NonOutgroupInteraction 0.424
Residual 8.307
Grouping Variables
Group # groups ICC
PID 23 0.706
# Generate 95% parametric bootstrap CIs (and save them as a csv-file):
  #write.csv(confint(model.ran0,
  #               method="boot",nsim=1000, 
  #               parallel = "multicore", ncpus = 4, seed = 42),
  #          "output/tables/ML-RandomSlopes-CI.csv")

# Compare new model to previous step
  anova(model.int, model.ran) %>%
    as.data.frame %>%
    select(-call) %>%
    kbl(., 
        #label = "",
        caption = "Worker: Model Comparison",
        format = "html", 
        linesep = "",
        booktabs = T,
        align = rep('c', ncol(.)),
        digits = 3)  %>%
  kable_styling(position = "left")
Table 3: Worker: Model Comparison
Model df AIC BIC logLik Test L.Ratio p-value
model.int 1 5 8788 8814 -4389 NA NA
model.ran 2 10 8794 8845 -4387 1 vs 2 4.165 0.526
# Save variances
  v.ran = lme4::VarCorr(model.ran)

# The estimate of between-group (or Intercept variance) explained: 
  # Variance Explained = 1 – (Var with Predictor/Var without Predictor)
    v.ran.btw <- 1 - (as.numeric(v.ran[1])/as.numeric(v.int[1]))
    v.ran.btw.perc <-  (1 - (as.numeric(v.ran[1])/as.numeric(v.int[1])))*100
  # and the estimate of within-group (or residual variancel) explained is:
    v.ran.within <- 1 - (as.numeric(v.ran[2])/as.numeric(v.int[2]))
    v.ran.within.perc <- (1 - (as.numeric(v.ran[2])/as.numeric(v.int[2])))*100

# Assumption Checks: 
  diag = sjPlot::plot_model(model.ran0, type = "diag")
  grid.arrange(diag[[1]],diag[[2]]$`PID`,diag[[3]],diag[[4]])

# Plot prediction model
  dat.new = workerInteractionType[,c("thermometerDutch_1","session","PID")]
  dat.new$measure = predict(model.ran, workerInteractionType, re.form=NA)

  (t.ppt = ggplot(data=dat.new, aes(x=session, y=measure)) + 
    geom_line(alpha=1, color = "blue") +
    geom_line(aes(y = thermometerDutch_1), alpha=1)+
    facet_wrap(~PID, ncol = 6)+
    xlab("Time")+
    ylab("Outgroup Attitudes")+
    theme_bw())

  ggsave(filename = "Figures/Development per PPT.png", t.ppt,
       width = 18, height = 12, dpi = 800, units = "cm", device='png')
  
  rm(dat.new)

TL;DR: Random slopes don’t add much for this super simple model.

Interaction Frequency and Interaction Quality

\[\begin{equation} \tag{7} Attitude = ContactFreq \times AverageContactQual \end{equation}\]

# prepare data
workerAvFreQual <- dtWorker$full %>% 
  group_by(ExternalReference) %>%
  summarise(
    n = n(),
    SumContactNL = sum(Contact_dum),
    PercContactNL = SumContactNL/n*100,
    SumContactNLAll = sum(number),
    AvAttitude = mean(thermometerDutch_1, na.rm = TRUE),
    AvQuality = mean(quality_overall_1, na.rm = TRUE)
  ) %>%
  mutate(
    WinSumContactNL = DescTools::Winsorize(SumContactNL),
    WinSumContactNLAll = DescTools::Winsorize(SumContactNLAll)
  )

# correlation panel 
pairs.panels.new(workerAvFreQual %>% select(SumContactNL, SumContactNLAll, AvQuality, AvAttitude), 
                 labels = c("Sum:\nNumer of beeps with Outgroup Contact (NL)", 
                            "Sum:\nNumber of Outgroup Contacts (NL)", 
                            "Mean:\nInteraction Quality",
                            "Mean:\nOutgroup Attitudes (NL)"))

# correlation panel with interaction sums winsorized
pairs.panels.new(workerAvFreQual %>% select(WinSumContactNL, WinSumContactNLAll, AvQuality, AvAttitude), 
                 labels = c("Sum:\nNumer of beeps with Outgroup Contact (NL)\n[Winsorized]", 
                            "Sum:\nNumber of Outgroup Contacts (NL)\n[Winsorized]", 
                            "Mean:\nInteraction Quality",
                            "Mean:\nOutgroup Attitudes (NL)"))

TL;DR: There is a medium sized correlation between the participants’ Average Interaction Quality and their Average Outgroup Attitudes. Thus within our data participants with a higher quality outgroup interactions also held more positive attitudes towards that group. However, the frequency of intergroup interactions had no meaningfull correlation with either the average interaction quality or average outgroup attitudes.

# regression
lmworkerFreqInt <- lm(AvAttitude ~ SumContactNL * AvQuality, data = workerAvFreQual)
#summary(lmWorkerInteraction)
# apa_table(
#   apa_print(lmworkerFreqInt)$table, 
#   caption = "Regression across all measurement points [ignoring nested data]."
# ) 

summ(
  lmworkerFreqInt,
  confint = TRUE, 
  digits = 3,
  center = TRUE
     )
Observations 21 (2 missing obs. deleted)
Dependent variable AvAttitude
Type OLS linear regression
F(3,17) 6.663
0.540
Adj. R² 0.459
Est. 2.5% 97.5% t val. p
(Intercept) 70.930 66.519 75.341 33.927 0.000
SumContactNL 0.269 -0.150 0.688 1.354 0.193
AvQuality 0.765 0.288 1.242 3.385 0.004
SumContactNL:AvQuality -0.049 -0.085 -0.014 -2.954 0.009
Standard errors: OLS; Continuous predictors are mean-centered.
interactions::interact_plot(lmworkerFreqInt, pred = AvQuality, modx = SumContactNL, interval = TRUE, partial.residuals = TRUE)

interactions::johnson_neyman(lmworkerFreqInt, pred = AvQuality, modx = SumContactNL, alpha = .05)
## JOHNSON-NEYMAN INTERVAL 
## 
## When SumContactNL is OUTSIDE the interval [23.66, 75.42], the slope of AvQuality is p < .05.
## 
## Note: The range of observed values of SumContactNL is [2.00, 51.00]

TL;DR: We find that interaction quality is significantly related to higher outgroup attitudes (albeit with a small effect size). We also find that in our sample with an increasing number of interactions the positive effect of interaction quality becomes weaker. However, note that this is based on data aggregating all within participant nuances and is only the date of 21 people.

Need Fulfillment

Need fulfillment and Attitudes

Unconditional model

make a model with only the data but a random intercept (unconditional model) for the outgroup interaction subset.

# see how large our outgroup interaction subset actually is 
tbl_cross(workerInteractionType, row=OutgroupInteraction, col=NonOutgroupInteraction, percent="row")
Characteristic Did you meet non-Dutch people this morning? (in person for at least 10 minutes) Total
no yes
Did you meet a Dutch person this morning? (In person interaction for at least 10 minutes)
No 337 (40%) 501 (60%) 838 (100%)
Yes 110 (28%) 277 (72%) 387 (100%)
Total 447 (36%) 778 (64%) 1,225 (100%)
# create outgroup interaction subset
workerOutgroupInteraction <- workerInteractionType %>%
  filter(OutgroupInteraction == "Yes")

\[\begin{equation} \tag{8} Attitude_{ti} = \gamma_{00} + u_{0i} + e_{ti} \\ \textrm{with}\ u_{0i} \sim \mathcal{N}(0,\tau_{00}^2)\ \textrm{and}\ e_{ti} \sim \mathcal{N}(0,\sigma^2) \end{equation}\]

# Create and save Model
  Null.Out.ML.r = lme4::lmer(thermometerDutch_1~1 + (1|PID),data=workerOutgroupInteraction) #use optim if it does not converge
  Null.Out.ML = lme(thermometerDutch_1~1,random=~1|PID, data=workerOutgroupInteraction, control=list(opt="nlmimb")) #use optim if it does not converge

# Get summary with p-values (Satterthwaite's method)
  #summary(Null.ML.r) #or with the lme function
  summ(Null.Out.ML.r, digits = 3, center = TRUE)
Observations 387
Dependent variable thermometerDutch_1
Type Mixed effects linear regression
AIC 2863.460
BIC 2875.336
Pseudo-R² (fixed effects) 0.000
Pseudo-R² (total) 0.684
Fixed Effects
Est. S.E. t val. d.f. p
(Intercept) 72.550 2.910 24.933 19.198 0.000
p values calculated using Satterthwaite d.f. ; Continuous predictors are mean-centered.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 13.069
Residual 8.883
Grouping Variables
Group # groups ICC
PID 21 0.684
# generate 95% parametric bootstrap CIs (and save them as a csv-file):
  #write.csv(confint(lmer(thermometerDutch_1~1 + (1|PID),data=df),
  #                  method="boot",nsim=1000, 
  #                  parallel = "multicore", ncpus = 4, seed = 42),
  #          "output/tables/ML-Null-CI.csv")

# Save variances
  v.out.null <- VarCorr(Null.Out.ML) # save variances
# The estimate of (between-group or Intercept variance, tau_{00}^2):
  tau <- as.numeric(v.out.null[1])
# and the estimate of (within-group or residual variancel, sigma^2) is:
  sigma <- as.numeric(v.out.null[2])
# The ICC estimate (between/between+within) is:
  ICC <- (as.numeric(v.out.null[1])/(as.numeric(v.out.null[1])+as.numeric(v.out.null[2])))
  ICC.Perc <- ((as.numeric(v.out.null[1])/(as.numeric(v.out.null[1])+as.numeric(v.out.null[2]))))*100

random intercept

\[\begin{equation} \tag{9} Attitude_{ti} = \gamma_{00} + \gamma_{10}KeyNeedFulfill_{ti} + u_{0i} + e_{ti} \\ \textrm{with}\ u_{0i} \sim \mathcal{N}(0,\tau_{00}^2)\ \textrm{and}\ e_{ti} \sim \mathcal{N}(0,\sigma^2) \end{equation}\]

# Create and save Model
  model.keyneed = lme(thermometerDutch_1 ~ keymotive_fulfillemt_1, random=~1|PID,data=workerOutgroupInteraction)

# Get summary with p-values (Satterthwaite's method)
  summ(lmer(thermometerDutch_1 ~ keymotive_fulfillemt_1 + (1|PID),data=workerOutgroupInteraction), 
       confint = TRUE, digits = 3, center = TRUE)
Observations 387
Dependent variable thermometerDutch_1
Type Mixed effects linear regression
AIC 2842.675
BIC 2858.508
Pseudo-R² (fixed effects) 0.033
Pseudo-R² (total) 0.713
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 72.690 66.947 78.433 24.808 19.259 0.000
keymotive_fulfillemt_1 0.147 0.094 0.201 5.404 372.954 0.000
p values calculated using Satterthwaite d.f. ; Continuous predictors are mean-centered.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 13.182
Residual 8.556
Grouping Variables
Group # groups ICC
PID 21 0.704
# Generate 95% parametric bootstrap CIs (and save them as a csv-file):
  #write.csv(confint(lmer(thermometerDutch_1 ~ Contact_dum + inNonDutch + (1|PID),data=df), 
  #                  method="boot",nsim=1000, parallel = "multicore", 
  #                  ncpus = 4, seed = 42),
  #          "output/tables/ML-Inter-CI.csv")

# Compare new model to previous step
  anova(Null.Out.ML, model.keyneed) %>%
    as.data.frame %>%
    select(-call) %>%
    kbl(., 
        #label = "",
        caption = "Worker: Model Comparison",
        format = "html", 
        linesep = "",
        booktabs = T,
        align = rep('c', ncol(.)),
        digits = 3)  %>%
  kable_styling(position = "left")
Table 4: Worker: Model Comparison
Model df AIC BIC logLik Test L.Ratio p-value
Null.Out.ML 1 3 2863 2875 -1429 NA NA
model.keyneed 2 4 2843 2858 -1417 1 vs 2 22.79 0
# Save variances
  v.keyneed <- lme4::VarCorr(model.keyneed) 

# The estimate of between-group (or Intercept variance) explained: 
  # Variance Explained = 1 – (Var with Predictor/Var without Predictor)
    v.keyneed.btw <- 1 - (as.numeric(v.keyneed[1])/as.numeric(v.out.null[1]))
    v.keyneed.btw.perc <-  (1 - (as.numeric(v.keyneed[1])/as.numeric(v.out.null[1])))*100
  # and the estimate of within-group (or residual variancel) explained is:
    v.keyneed.within <- 1 - (as.numeric(v.keyneed[2])/as.numeric(v.out.null[2]))
    v.keyneed.within.perc <- (1 - (as.numeric(v.keyneed[2])/as.numeric(v.out.null[2])))*100

check for random slope

\[\begin{equation} \tag{10} Attitude_{ti} = \gamma_{00} + \gamma_{10}KeyNeedFulfill_{ti} + u_{0i} + u_{1i} + e_{ti} \\ \textrm{with}\ \begin{bmatrix} u_{0i}\\ u_{1i}\end{bmatrix} \sim \mathcal{N}(0,\Omega_u): \Omega_u=\begin{bmatrix} \tau_{0}^2 & \\ \tau_{01} & \tau_{1}^2 \end{bmatrix} \textrm{and}\ e_{ti} \sim \mathcal{N}(0,\sigma^2) \end{equation}\]

# Create and save Model (optimizer needed to reach convergence)
  model.keyneed.ran = lme(thermometerDutch_1~
                         keymotive_fulfillemt_1, 
                         random=~1+keymotive_fulfillemt_1|PID,
                       control=lmeControl(opt='optim'), data=workerOutgroupInteraction)

# Get summary with p-values (Satterthwaite's method) [+ save model for plotting]
  summ(model.keyneed.ran0 <- lmer(thermometerDutch_1 ~
                           keymotive_fulfillemt_1 + 
                           (1+keymotive_fulfillemt_1|PID), 
                         data=workerOutgroupInteraction), 
       confint = TRUE, digits = 3, center = TRUE)
Observations 387
Dependent variable thermometerDutch_1
Type Mixed effects linear regression
AIC 2815.398
BIC 2839.148
Pseudo-R² (fixed effects) 0.046
Pseudo-R² (total) 0.763
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 71.863 66.133 77.594 24.578 19.326 0.000
keymotive_fulfillemt_1 0.179 0.061 0.297 2.970 18.186 0.008
p values calculated using Satterthwaite d.f. ; Continuous predictors are mean-centered.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 13.111
PID keymotive_fulfillemt_1 0.219
Residual 7.941
Grouping Variables
Group # groups ICC
PID 21 0.732
# Generate 95% parametric bootstrap CIs (and save them as a csv-file):
  #write.csv(confint(model.ran0,
  #               method="boot",nsim=1000, 
  #               parallel = "multicore", ncpus = 4, seed = 42),
  #          "output/tables/ML-RandomSlopes-CI.csv")

# Compare new model to previous step
  anova(model.keyneed, model.keyneed.ran) %>%
    as.data.frame %>%
    select(-call) %>%
    kbl(., 
        #label = "",
        caption = "Worker: Model Comparison",
        format = "html", 
        linesep = "",
        booktabs = T,
        align = rep('c', ncol(.)),
        digits = 3)  %>%
  kable_styling(position = "left")
Table 5: Worker: Model Comparison
Model df AIC BIC logLik Test L.Ratio p-value
model.keyneed 1 4 2843 2858 -1417 NA NA
model.keyneed.ran 2 6 2815 2839 -1402 1 vs 2 31.28 0
# Save variances
  v.keyneed.ran = lme4::VarCorr(model.keyneed.ran)

# The estimate of between-group (or Intercept variance) explained: 
  # Variance Explained = 1 – (Var with Predictor/Var without Predictor)
    v.keyneed.ran.btw <- 1 - (as.numeric(v.keyneed.ran[1])/as.numeric(v.keyneed[1]))
    v.keyneed.ran.btw.perc <-  (1 - (as.numeric(v.keyneed.ran[1])/as.numeric(v.keyneed[1])))*100
  # and the estimate of within-group (or residual variancel) explained is:
    v.keyneed.ran.within <- 1 - (as.numeric(v.keyneed.ran[2])/as.numeric(v.keyneed[2]))
    v.keyneed.ran.within.perc <- (1 - (as.numeric(v.keyneed.ran[2])/as.numeric(v.keyneed[2])))*100

# Assumption Checks: 
  diag = sjPlot::plot_model(model.keyneed.ran0, type = "diag")
  grid.arrange(diag[[1]],diag[[2]]$`PID`,diag[[3]],diag[[4]])

# Plot prediction model
  dat.new = workerOutgroupInteraction[,c("thermometerDutch_1","session","PID")]
  dat.new$measure = predict(model.keyneed.ran, workerOutgroupInteraction, re.form=NA)

  (t.ppt = ggplot(data=dat.new, aes(x=session, y=measure)) + 
    geom_line(alpha=1, color = "blue") +
    geom_line(aes(y = thermometerDutch_1), alpha=1)+
    facet_wrap(~PID, ncol = 6)+
    xlab("Time")+
    ylab("Outgroup Attitudes")+
    theme_bw())

  ggsave(filename = "Figures/Development per PPT Out Keyneed.png", t.ppt,
       width = 18, height = 12, dpi = 800, units = "cm", device='png')
  
  rm(dat.new)

Need fulfillment and Interaction Quality

Unconditional model

make a model with only the data but a random intercept (unconditional model) for the outgroup interaction subset.

\[\begin{equation} \tag{11} Attitude_{ti} = \gamma_{00} + u_{0i} + e_{ti} \\ \textrm{with}\ u_{0i} \sim \mathcal{N}(0,\tau_{00}^2)\ \textrm{and}\ e_{ti} \sim \mathcal{N}(0,\sigma^2) \end{equation}\]

# Create and save Model
  Null.Out.Qual.ML.r = lme4::lmer(quality_overall_1~1 + (1|PID),data=workerOutgroupInteraction) #use optim if it does not converge
  Null.Out.Qual.ML = lme(quality_overall_1~1,random=~1|PID, data=workerOutgroupInteraction, control=list(opt="nlmimb")) #use optim if it does not converge

# Get summary with p-values (Satterthwaite's method)
  #summary(Null.ML.r) #or with the lme function
  summ(Null.Out.Qual.ML.r, digits = 3, center = TRUE)
Observations 387
Dependent variable quality_overall_1
Type Mixed effects linear regression
AIC 3347.534
BIC 3359.410
Pseudo-R² (fixed effects) 0.000
Pseudo-R² (total) 0.183
Fixed Effects
Est. S.E. t val. d.f. p
(Intercept) 24.285 2.090 11.619 20.156 0.000
p values calculated using Satterthwaite d.f. ; Continuous predictors are mean-centered.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 8.286
Residual 17.511
Grouping Variables
Group # groups ICC
PID 21 0.183
# generate 95% parametric bootstrap CIs (and save them as a csv-file):
  #write.csv(confint(lmer(thermometerDutch_1~1 + (1|PID),data=df),
  #                  method="boot",nsim=1000, 
  #                  parallel = "multicore", ncpus = 4, seed = 42),
  #          "output/tables/ML-Null-CI.csv")

# Save variances
  v.out.qual.null <- VarCorr(Null.Out.Qual.ML) # save variances
# The estimate of (between-group or Intercept variance, tau_{00}^2):
  tau <- as.numeric(v.out.qual.null[1])
# and the estimate of (within-group or residual variancel, sigma^2) is:
  sigma <- as.numeric(v.out.qual.null[2])
# The ICC estimate (between/between+within) is:
  ICC <- (as.numeric(v.out.qual.null[1])/(as.numeric(v.out.qual.null[1])+as.numeric(v.out.qual.null[2])))
  ICC.Perc <- ((as.numeric(v.out.qual.null[1])/(as.numeric(v.out.qual.null[1])+as.numeric(v.out.qual.null[2]))))*100

random intercept

\[\begin{equation} \tag{12} InteractionQuality_{ti} = \gamma_{00} + \gamma_{10}KeyNeedFulfill_{ti} + u_{0i} + e_{ti} \\ \textrm{with}\ u_{0i} \sim \mathcal{N}(0,\tau_{00}^2)\ \textrm{and}\ e_{ti} \sim \mathcal{N}(0,\sigma^2) \end{equation}\]

# Create and save Model
  model.qual.keyneed = lme(quality_overall_1 ~ keymotive_fulfillemt_1, random=~1|PID,data=workerOutgroupInteraction)

# Get summary with p-values (Satterthwaite's method)
  summ(lmer(quality_overall_1 ~ keymotive_fulfillemt_1 + (1|PID),data=workerOutgroupInteraction), 
       confint = TRUE, digits = 3, center = TRUE)
Observations 387
Dependent variable quality_overall_1
Type Mixed effects linear regression
AIC 3287.252
BIC 3303.085
Pseudo-R² (fixed effects) 0.179
Pseudo-R² (total) 0.303
Fixed Effects
Est. 2.5% 97.5% t val. d.f. p
(Intercept) 24.551 21.086 28.016 13.886 19.850 0.000
keymotive_fulfillemt_1 0.417 0.321 0.513 8.528 360.783 0.000
p values calculated using Satterthwaite d.f. ; Continuous predictors are mean-centered.
Random Effects
Group Parameter Std. Dev.
PID (Intercept) 6.814
Residual 16.157
Grouping Variables
Group # groups ICC
PID 21 0.151
# Generate 95% parametric bootstrap CIs (and save them as a csv-file):
  #write.csv(confint(lmer(thermometerDutch_1 ~ Contact_dum + inNonDutch + (1|PID),data=df), 
  #                  method="boot",nsim=1000, parallel = "multicore", 
  #                  ncpus = 4, seed = 42),
  #          "output/tables/ML-Inter-CI.csv")

# Compare new model to previous step
  anova(Null.Out.Qual.ML, model.qual.keyneed) %>%
    as.data.frame %>%
    select(-call) %>%
    kbl(., 
        #label = "",
        caption = "Worker: Model Comparison",
        format = "html", 
        linesep = "",
        booktabs = T,
        align = rep('c', ncol(.)),
        digits = 3)  %>%
  kable_styling(position = "left")
Table 6: Worker: Model Comparison
Model df AIC BIC logLik Test L.Ratio p-value
Null.Out.Qual.ML 1 3 3348 3359 -1671 NA NA
model.qual.keyneed 2 4 3287 3303 -1640 1 vs 2 62.28 0
# Save variances
  v.qual.keyneed <- lme4::VarCorr(model.qual.keyneed) 

# The estimate of between-group (or Intercept variance) explained: 
  # Variance Explained = 1 – (Var with Predictor/Var without Predictor)
    v.qual.keyneed.btw <- 1 - (as.numeric(v.qual.keyneed[1])/as.numeric(v.out.qual.null[1]))
    v.qual.keyneed.btw.perc <-  (1 - (as.numeric(v.qual.keyneed[1])/as.numeric(v.out.qual.null[1])))*100
  # and the estimate of within-group (or residual variancel) explained is:
    v.qual.keyneed.within <- 1 - (as.numeric(v.qual.keyneed[2])/as.numeric(v.out.qual.null[2]))
    v.qual.keyneed.within.perc <- (1 - (as.numeric(v.qual.keyneed[2])/as.numeric(v.out.qual.null[2])))*100

check for random slope

\[\begin{equation} \tag{13} InteractionQuality_{ti} = \gamma_{00} + \gamma_{10}KeyNeedFulfill_{ti} + u_{0i} + u_{1i} + e_{ti} \\ \textrm{with}\ \begin{bmatrix} u_{0i}\\ u_{1i}\end{bmatrix} \sim \mathcal{N}(0,\Omega_u): \Omega_u=\begin{bmatrix} \tau_{0}^2 & \\ \tau_{01} & \tau_{1}^2 \end{bmatrix} \textrm{and}\ e_{ti} \sim \mathcal{N}(0,\sigma^2) \end{equation}\]

Need fulfillment, Interaction Quality, and Attitudes

random intercept

\[\begin{equation} \tag{14} Attitude_{ti} = \gamma_{00} + \gamma_{10}KeyNeedFulfill_{ti} + \gamma_{20}InteractionQuality_{ti} + u_{0i} + e_{ti} \\ \textrm{with}\ u_{0i} \sim \mathcal{N}(0,\tau_{00}^2)\ \textrm{and}\ e_{ti} \sim \mathcal{N}(0,\sigma^2) \end{equation}\]

check for random slope

\[\begin{equation} \tag{15} Attitude_{ti} = \gamma_{00} + \gamma_{10}KeyNeedFulfill_{ti} + \gamma_{20}InteractionQuality_{ti} + u_{0i} + u_{1i} + e_{ti} \\ \textrm{with}\ \begin{bmatrix} u_{0i}\\ u_{1i}\end{bmatrix} \sim \mathcal{N}(0,\Omega_u): \Omega_u=\begin{bmatrix} \tau_{0}^2 & \\ \tau_{01} & \tau_{1}^2 \end{bmatrix} \textrm{and}\ e_{ti} \sim \mathcal{N}(0,\sigma^2) \end{equation}\]

Compare models

Check for robustness

Interaction Type

random intercept

\[\begin{equation} \tag{16} Attitude_{ti} = \gamma_{00} + \gamma_{10}KeyNeedFulfill_{ti} + \gamma_{20}OutgroupInteraction_{ti} + \gamma_{30}KeyNeedFulfillXOutgroupInteraction_{ti} + u_{0i} + e_{ti} \\ \textrm{with}\ u_{0i} \sim \mathcal{N}(0,\tau_{00}^2)\ \textrm{and}\ e_{ti} \sim \mathcal{N}(0,\sigma^2) \end{equation}\]

check for random slope

\[\begin{equation} \tag{17} Attitude_{ti} = \gamma_{00} + \gamma_{10}KeyNeedFulfill_{ti} + \gamma_{20}OutgroupInteraction_{ti} + \gamma_{30}KeyNeedFulfillXOutgroupInteraction_{ti} + u_{0i} + u_{1i} + e_{ti} \\ \textrm{with}\ \begin{bmatrix} u_{0i}\\ u_{1i}\end{bmatrix} \sim \mathcal{N}(0,\Omega_u): \Omega_u=\begin{bmatrix} \tau_{0}^2 & \\ \tau_{01} & \tau_{1}^2 \end{bmatrix} \textrm{and}\ e_{ti} \sim \mathcal{N}(0,\sigma^2) \end{equation}\]

Other psychological needs

random intercept

\[\begin{equation} \tag{18} Attitude_{ti} = \gamma_{00} + \gamma_{10}KeyNeedFulfill_{ti} + \gamma_{20}Autonomy_{ti} + \gamma_{30}Competence_{ti} + \gamma_{40}Relatedness_{ti} + u_{0i} + e_{ti} \\ \textrm{with}\ u_{0i} \sim \mathcal{N}(0,\tau_{00}^2)\ \textrm{and}\ e_{ti} \sim \mathcal{N}(0,\sigma^2) \end{equation}\]

check for random slope

\[\begin{equation} \tag{19} Attitude_{ti} = \\gamma_{00} + \gamma_{10}KeyNeedFulfill_{ti} + \gamma_{20}Autonomy_{ti} + \gamma_{30}Competence_{ti} + \gamma_{40}Relatedness_{ti} + u_{0i} + u_{1i} + e_{ti} \\ \textrm{with}\ \begin{bmatrix} u_{0i}\\ u_{1i}\end{bmatrix} \sim \mathcal{N}(0,\Omega_u): \Omega_u=\begin{bmatrix} \tau_{0}^2 & \\ \tau_{01} & \tau_{1}^2 \end{bmatrix} \textrm{and}\ e_{ti} \sim \mathcal{N}(0,\sigma^2) \end{equation}\]

Student Sample

Contact Hypothesis

Need Fulfillment

Young Medical Professional Sample

Contact Hypothesis

Allport’s Conditions

Need Fulfillment

Software Information

The full session information with all relevant system information and all loaded and installed packages is available in the collapsible section below.

System Info
Table 7: R environment session info for reproducibility of results
Setting Value
version R version 4.1.1 (2021-08-10)
os macOS Big Sur 10.16
system x86_64, darwin17.0
ui X11
language (EN)
collate en_US.UTF-8
ctype en_US.UTF-8
tz Europe/Amsterdam
date 2021-10-14

Package Info
Table 8: Package info for reproducibility of results
Package Loaded version Date Source
bookdown 0.24 2021-09-02 CRAN (R 4.1.0)
data.table 1.14.0 2021-02-21 CRAN (R 4.1.0)
devtools 2.4.2 2021-06-07 CRAN (R 4.1.0)
dplyr 1.0.7 2021-06-18 CRAN (R 4.1.0)
ellipse 0.4.2 2020-05-27 CRAN (R 4.1.0)
Formula 1.2-4 2020-10-16 CRAN (R 4.1.0)
ggpattern 0.2.0 2021-10-11 Github ()
ggplot2 3.3.5 2021-06-25 CRAN (R 4.1.0)
ggthemes 4.2.4 2021-01-20 CRAN (R 4.1.0)
gridExtra 2.3 2017-09-09 CRAN (R 4.1.0)
gtsummary 1.4.2 2021-07-13 CRAN (R 4.1.0)
haven 2.4.3 2021-08-04 CRAN (R 4.1.0)
Hmisc 4.5-0 2021-02-28 CRAN (R 4.1.0)
jtools 2.1.4 2021-09-03 CRAN (R 4.1.0)
kableExtra 1.3.4 2021-02-20 CRAN (R 4.1.0)
knitr 1.36 2021-09-29 CRAN (R 4.1.0)
lattice 0.20-44 2021-05-02 CRAN (R 4.1.1)
lme4 1.1-27.1 2021-06-22 CRAN (R 4.1.0)
lubridate 1.7.10 2021-02-26 CRAN (R 4.1.0)
mada 0.5.10 2020-05-25 CRAN (R 4.1.0)
Matrix 1.3-4 2021-06-01 CRAN (R 4.1.1)
mvmeta 1.0.3 2019-12-10 CRAN (R 4.1.0)
mvtnorm 1.1-2 2021-06-07 CRAN (R 4.1.0)
nlme 3.1-152 2021-02-04 CRAN (R 4.1.1)
pander 0.6.4 2021-06-13 CRAN (R 4.1.0)
papaja 0.1.0.9997 2021-10-11 Github ()
plotly 4.9.4.9000 2021-08-28 Github ()
plyr 1.8.6 2020-03-03 CRAN (R 4.1.0)
psych 2.1.6 2021-06-18 CRAN (R 4.1.0)
RColorBrewer 1.1-2 2014-12-07 CRAN (R 4.1.0)
remedy 0.1.0 2018-12-03 CRAN (R 4.1.0)
reshape2 1.4.4 2020-04-09 CRAN (R 4.1.0)
rmarkdown 2.11 2021-09-14 CRAN (R 4.1.1)
sessioninfo 1.1.1 2018-11-05 CRAN (R 4.1.0)
stringi 1.7.5 2021-10-04 CRAN (R 4.1.1)
stringr 1.4.0 2019-02-10 CRAN (R 4.1.0)
survival 3.2-12 2021-08-13 CRAN (R 4.1.1)
tibble 3.1.5 2021-09-30 CRAN (R 4.1.0)
tidyr 1.1.4 2021-09-27 CRAN (R 4.1.0)
usethis 2.0.1 2021-02-10 CRAN (R 4.1.0)

Full Session Info (including loaded but unattached packages — for troubleshooting only)

R version 4.1.1 (2021-08-10)

Platform: x86_64-apple-darwin17.0 (64-bit)

locale: en_US.UTF-8||en_US.UTF-8||en_US.UTF-8||C||en_US.UTF-8||en_US.UTF-8

attached base packages:

  • grid
  • stats
  • graphics
  • grDevices
  • datasets
  • utils
  • methods
  • base

other attached packages:

  • lubridate(v.1.7.10)
  • reshape2(v.1.4.4)
  • stringi(v.1.7.5)
  • stringr(v.1.4.0)
  • papaja(v.0.1.0.9997)
  • kableExtra(v.1.3.4)
  • Hmisc(v.4.5-0)
  • Formula(v.1.2-4)
  • survival(v.3.2-12)
  • lattice(v.0.20-44)
  • tidyr(v.1.1.4)
  • dplyr(v.1.0.7)
  • plyr(v.1.8.6)
  • data.table(v.1.14.0)
  • mada(v.0.5.10)
  • mvmeta(v.1.0.3)
  • ellipse(v.0.4.2)
  • mvtnorm(v.1.1-2)
  • devtools(v.2.4.2)
  • usethis(v.2.0.1)
  • pander(v.0.6.4)
  • tibble(v.3.1.5)
  • sessioninfo(v.1.1.1)
  • gtsummary(v.1.4.2)
  • jtools(v.2.1.4)
  • nlme(v.3.1-152)
  • lme4(v.1.1-27.1)
  • Matrix(v.1.3-4)
  • ggpattern(v.0.2.0)
  • gridExtra(v.2.3)
  • plotly(v.4.9.4.9000)
  • RColorBrewer(v.1.1-2)
  • haven(v.2.4.3)
  • ggthemes(v.4.2.4)
  • ggplot2(v.3.3.5)
  • psych(v.2.1.6)
  • bookdown(v.0.24)
  • remedy(v.0.1.0)
  • knitr(v.1.36)
  • rmarkdown(v.2.11)

loaded via a namespace (and not attached):

  • utf8(v.1.2.2)
  • tidyselect(v.1.1.1)
  • htmlwidgets(v.1.5.4)
  • munsell(v.0.5.0)
  • effectsize(v.0.4.5)
  • chron(v.2.3-56)
  • withr(v.2.4.2)
  • colorspace(v.2.0-2)
  • highr(v.0.9)
  • rstudioapi(v.0.13)
  • DescTools(v.0.99.43)
  • labeling(v.0.4.2)
  • emmeans(v.1.6.3)
  • mnormt(v.2.0.2)
  • farver(v.2.1.0)
  • datawizard(v.0.2.0)
  • glmmTMB(v.1.1.2.3)
  • rprojroot(v.2.0.2)
  • coda(v.0.19-4)
  • vctrs(v.0.3.8)
  • generics(v.0.1.0)
  • xfun(v.0.26)
  • R6(v.2.5.1)
  • cachem(v.1.0.6)
  • assertthat(v.0.2.1)
  • scales(v.1.1.1)
  • nnet(v.7.3-16)
  • rootSolve(v.1.8.2.2)
  • gtable(v.0.3.0)
  • processx(v.3.5.2)
  • lmom(v.2.8)
  • rlang(v.0.4.11)
  • systemfonts(v.1.0.2)
  • sjPlot(v.2.8.9)
  • splines(v.4.1.1)
  • TMB(v.1.7.21)
  • lazyeval(v.0.2.2)
  • broom(v.0.7.9.9001)
  • checkmate(v.2.0.0)
  • yaml(v.2.2.1)
  • modelr(v.0.1.8)
  • backports(v.1.2.1)
  • tools(v.4.1.1)
  • ellipsis(v.0.3.2)
  • jquerylib(v.0.1.4)
  • proxy(v.0.4-26)
  • Rcpp(v.1.0.7)
  • base64enc(v.0.1-3)
  • purrr(v.0.3.4)
  • ps(v.1.6.0)
  • prettyunits(v.1.1.1)
  • rpart(v.4.1-15)
  • cluster(v.2.1.2)
  • fs(v.1.5.0)
  • magrittr(v.2.0.1)
  • lmerTest(v.3.1-3)
  • tmvnsim(v.1.0-2)
  • sjmisc(v.2.8.7)
  • pkgload(v.1.2.1)
  • hms(v.1.1.1)
  • evaluate(v.0.14)
  • xtable(v.1.8-4)
  • pbkrtest(v.0.5.1)
  • sjstats(v.0.18.1)
  • jpeg(v.0.1-9)
  • ggeffects(v.1.1.1)
  • testthat(v.3.0.4)
  • compiler(v.4.1.1)
  • gt(v.0.3.1)
  • crayon(v.1.4.1)
  • minqa(v.1.2.4)
  • htmltools(v.0.5.2)
  • mgcv(v.1.8-36)
  • tzdb(v.0.1.2)
  • expm(v.0.999-6)
  • Exact(v.3.0)
  • DBI(v.1.1.1)
  • sjlabelled(v.1.1.8)
  • MASS(v.7.3-54)
  • broom.helpers(v.1.4.0)
  • boot(v.1.3-28)
  • readr(v.2.0.2)
  • cli(v.3.0.1)
  • parallel(v.4.1.1)
  • insight(v.0.14.3)
  • forcats(v.0.5.1)
  • pkgconfig(v.2.0.3)
  • numDeriv(v.2016.8-1.1)
  • foreign(v.0.8-81)
  • xml2(v.1.3.2)
  • svglite(v.2.0.0)
  • bslib(v.0.3.0)
  • webshot(v.0.5.2)
  • estimability(v.1.3)
  • rvest(v.1.0.1)
  • callr(v.3.7.0)
  • digest(v.0.6.28)
  • parameters(v.0.14.0)
  • htmlTable(v.2.2.1)
  • gld(v.2.6.2)
  • commonmark(v.1.7)
  • nloptr(v.1.2.2.2)
  • lifecycle(v.1.0.1)
  • jsonlite(v.1.7.2)
  • interactions(v.1.1.5)
  • desc(v.1.3.0)
  • viridisLite(v.0.4.0)
  • fansi(v.0.5.0)
  • pillar(v.1.6.3)
  • fastmap(v.1.1.0)
  • httr(v.1.4.2)
  • pkgbuild(v.1.2.0)
  • glue(v.1.4.2)
  • remotes(v.2.4.0)
  • bayestestR(v.0.10.5)
  • png(v.0.1-7)
  • class(v.7.3-19)
  • sass(v.0.4.0)
  • performance(v.0.7.3)
  • rematch2(v.2.1.2)
  • latticeExtra(v.0.6-29)
  • mixmeta(v.1.1.3)
  • memoise(v.2.0.0)
  • renv(v.0.14.0)
  • e1071(v.1.7-9)


References